In [1]:
%matplotlib inline
import sys
sys.path.append('/apps')
import django
django.setup()
from drivers.graph_models import TreeNode, Order, Family, graph, pickNode
from traversals.strategies import sumTrees, UniformRandomSampleForest
Here I´m taking the tutorials from: https://www.tensorflow.org/get_started/get_started
In [2]:
## importation
import tensorflow as tf
In [3]:
node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0) # also tf.float32 implicitly
print(node1, node2)
The nodes are defined abstractically. For evaluating them we need to create (initialize) as session.
The following code creates a Session object and then invokes its run method to run enough of the computational graph to evaluate node1 and node2. By running the computational graph in a session as follows:
In [4]:
session = tf.Session()
print(session.run([node1,node2]))
We can perform algebraic computations. For example, sum node1 and node2
In [5]:
node3 = tf.add(node1,node2)
print("node 3 is:",node3)
print("session.run(node3):",session.run(node3))
In [10]:
type(node1)
print("node1 %s , node3 %s"%(type(node1),type(node3)))
In [11]:
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b # Shortcut for tf.add
In [12]:
type(a)
Out[12]:
The preceding three lines are a bit like a function or a lambda in which we define two input parameters (a and b) and then an operation on them. We can evaluate this graph with multiple inputs by using the feed_dict argument to the run method to feed concrete values to the placeholders:
In [10]:
session.run(adder_node,{a:3,b:4.5})
Out[10]:
In [14]:
session.run(adder_node, {a:[1,2,3,4] , b : [1,1,1,1]})
Out[14]:
Eventhough the adder_node is made of placeholders we can still add it a any node
In [16]:
add_and_triple = adder_node * 3.
print(session.run(add_and_triple, {a: 3, b: 4.5}))
In [14]:
## Variables
W = tf.Variable([.3],dtype=tf.float32)
b = tf.Variable([-.3],dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W*x + b
$y = x^t W + b $
In [15]:
type(W)
Out[15]:
In [17]:
Out[17]:
For initializing all the variables in tensorflow we need to use the function init
In [18]:
initializer = tf.global_variables_initializer()
session.run(initializer)
In [19]:
session.run(linear_model, {x: [0.3, 20, 36.4454, 4.33
]})
Out[19]:
A loss function measures how far apart the current model is from the provided data. We'll use a standard loss model for linear regression, which sums the squares of the deltas between the current model and the provided data. linear_model - y creates a vector where each element is the corresponding example's error delta. We call tf.square to square that error. Then, we sum all the squared errors to create a single scalar that abstracts the error of all examples using tf.reduce_sum
In [20]:
y = tf.placeholder(tf.float32)
squared_deltas = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_deltas)
print(session.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
In [26]:
type(linear_model)
Out[26]:
In [27]:
tf.reduce_sum?
In [32]:
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
type(train)
Out[32]:
In [22]:
# Reset values
session.run(initializer)
In [34]:
K = tf.assign(W,[-1])
G = tf.assign(b,[0])
session.run([K,G])
for i in range(100):
session.run(train, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]})
print(session.run([W,b]))
In [41]:
session.run([W,b])
session.run([loss],{ x: [1, 2, 3, 4], y: [0, -1, -2, -3]})
Out[41]:
In [38]:
## The full script
import tensorflow as tf
# Model parameters
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
# Model input and output
x = tf.placeholder(tf.float32)
linear_model = W * x + b
y = tf.placeholder(tf.float32)
# loss
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
# optimizer
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
# training data
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init) # reset values to wrong
for i in range(1000):
sess.run(train, {x: x_train, y: y_train})
# evaluate training accuracy
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train})
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))
In [ ]: